import tensorflow as tf
from keras.datasets import mnist
from keras.preprocessing.image import ImageDataGenerator
import cv2
import os
import pathlib
from keras.layers import Conv2D, Conv2DTranspose, Dropout, Dense, Reshape, LayerNormalization, LeakyReLU
from keras import layers, models
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import accuracy_score, classification_report
from sklearn.metrics import f1_score, recall_score, precision_score
from imblearn.over_sampling import SMOTE
# Check if GPU is available
print(tf.test.gpu_device_name())
/device:GPU:0
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
class ReadDataset:
def __init__(self, datasetpath, labels, image_shape):
self.datasetpath = datasetpath
self.labels = labels
self.image_shape = image_shape
def returListImages(self,):
self.images = []
for label in self.labels:
self.images.append(list(pathlib.Path(os.path.join(self.datasetpath,
label)).glob('*.*')))
def readImages(self,):
self.returListImages()
self.finalImages = []
labels = []
for label in range(len(self.labels)):
for img in self.images[label]:
img = cv2.imread(str(img))
img = cv2.resize(img, self.image_shape)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img/255
self.finalImages.append(img)
labels.append(label)
images = np.array(self.finalImages)
labels = np.array(labels)
return images, labels
readDatasetObject = ReadDataset('chest_xray/train',
['NORMAL', 'PNEUMONIA'],
(64, 64))
images, labels = readDatasetObject.readImages()
images.shape, labels.shape
((5216, 64, 64, 3), (5216,))
plt.figure(figsize = (12, 12))
indexs = np.random.randint(0, len(labels), size = (64, ))
for i in range(64):
plt.subplot(8, 8, (i + 1))
plt.imshow(images[indexs[i]])
plt.title(labels[indexs[i]])
plt.legend()
No artists with labels found to put in legend. Note that artists whose label start with an underscore are ignored when legend() is called with no argument.
<matplotlib.legend.Legend at 0x1834f63c130>
smote = SMOTE()
images_flat = images.reshape(images.shape[0], -1)
images_resampled, labels_resampled = smote.fit_resample(images_flat, labels)
images_resampled = images_resampled.reshape(-1, 64, 64, 3)
Exception in thread Thread-7 (_readerthread):
Traceback (most recent call last):
File "c:\Users\dario\mambaforge\lib\threading.py", line 1016, in _bootstrap_inner
self.run()
File "c:\Users\dario\mambaforge\lib\threading.py", line 953, in run
self._target(*self._args, **self._kwargs)
File "c:\Users\dario\mambaforge\lib\subprocess.py", line 1515, in _readerthread
buffer.append(fh.read())
File "c:\Users\dario\mambaforge\lib\codecs.py", line 322, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 16: invalid start byte
c:\Users\dario\mambaforge\lib\site-packages\joblib\externals\loky\backend\context.py:136: UserWarning: Could not find the number of physical cores for the following reason:
found 0 physical cores < 1
Returning the number of logical cores instead. You can silence this warning by setting LOKY_MAX_CPU_COUNT to the number of cores you want to use.
warnings.warn(
File "c:\Users\dario\mambaforge\lib\site-packages\joblib\externals\loky\backend\context.py", line 282, in _count_physical_cores
raise ValueError(f"found {cpu_count_physical} physical cores < 1")
data_gen_args = dict(
rotation_range=15,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.1,
zoom_range=0.1,
horizontal_flip=True,
fill_mode='nearest'
)
image_datagen = ImageDataGenerator(**data_gen_args)
augmented_data_generator = image_datagen.flow(images_resampled, labels_resampled, batch_size=64)
Sample images included in the dataset for each class
The proposed generative adversarial network, so that the difficulty of training and many of the problems that can be encountered in the generative adversarial network can be avoided, such as postural collapse and cognitive quality.
The structure focused on including many structures that helped some of them to avoid falling into the problem of situational collapse, and the other structures focused on perceptual quality (as it forced the distinguished network to focus more on the deeper features in the medical images, which helped the generator to capture them in the generation process).
The architecture helped make the training balanced between both the Generator and the Discriminator.
Conditional generation was used, whereby the Discriminator was forced to verify that the generated images were real, class-following (a healthy person, or a person with pneumonia).
The generator's input was a noise with a regular distribution, in addition to the pathological condition that we want to classify.
Use the MSE loss function to address the problem of cognitive quality and make the generator focus on the health characteristics of a healthy person, and the pathological characteristics of a person suffering from pneumonia.
class Acgan:
def __init__(self, eta, batch_size, epochs, latent_space,
image_shape, kernel_size):
self.eta = eta
self.batch_size = batch_size
self.epochs = epochs
self.latent_space = latent_space
self.image_shape = image_shape
self.kernel_size = kernel_size
def data(self, augmented_data_generator):
self.augmented_data_generator = augmented_data_generator
def samples(self, G, noize, labels):
images = G.predict([noize, labels])
ys = np.argmax(labels, axis=1)
plt.figure(figsize=(12, 4))
for i in range(16):
plt.subplot(2, 8, (i + 1))
plt.imshow(images[i])
plt.title(ys[i])
plt.show()
def generator(self, inputs, labels):
filters = [256, 128, 64, 32]
padding = 'same'
x = inputs
y = labels
x = layers.concatenate([x, y])
x = layers.Dense(1024)(x)
x = layers.Dropout(0.5)(x) # Added dropout for regularization
x = layers.Dense(8 * 8 * filters[0],
kernel_regularizer=tf.keras.regularizers.L2(0.001))(x)
x = layers.Reshape((8, 8, filters[0]))(x)
for filter in filters:
if filter >= 64:
strides = 2
else:
strides = 1
x = LayerNormalization()(x)
x = layers.Activation('relu')(x)
x = Conv2DTranspose(filter, kernel_size=self.kernel_size, padding=padding,
strides=strides)(x)
x = layers.Dropout(0.5)(x) # Added dropout for regularization
x = Conv2DTranspose(3, kernel_size=self.kernel_size, padding=padding)(x)
x = layers.Activation('sigmoid')(x)
self.generatorModel = models.Model(inputs=[inputs, labels],
outputs=x,
name='generator')
def discriminator(self, inputs):
x = inputs
filters = [32, 64, 128, 256]
padding = 'same'
for filter in filters:
if filter < 256:
strides = 2
else:
strides = 1
x = Conv2D(filter, kernel_size=self.kernel_size, padding=padding,
strides=strides,
kernel_regularizer=tf.keras.regularizers.L2(0.001))(x)
x = LeakyReLU(alpha=0.2)(x)
x = layers.Dropout(0.5)(x) # Added dropout for regularization
x = layers.Flatten()(x)
outputs = Dense(1)(x)
labelsOutput = Dense(256,
kernel_regularizer=tf.keras.regularizers.L2(0.001))(x)
labelsOutput = Dropout(0.3)(labelsOutput)
labelsOutput = Dense(2)(labelsOutput)
labelsOutput = layers.Activation('softmax')(labelsOutput)
self.discriminatorModel = models.Model(inputs=inputs,
outputs=[outputs, labelsOutput],
name='discriminator')
def build(self):
generatorInput = layers.Input(shape=(self.latent_space))
discriminatorInput = layers.Input(shape=(self.image_shape))
labelsInput = layers.Input(shape=(2, ))
self.generator(generatorInput, labelsInput)
self.discriminator(discriminatorInput)
G = self.generatorModel
D = self.discriminatorModel
# Using binary_crossentropy for both outputs of the discriminator
D.compile(loss=['binary_crossentropy', 'binary_crossentropy'],
optimizer=tf.keras.optimizers.RMSprop(learning_rate=self.eta))
D.summary()
G.summary()
D.trainable = False
GAN = models.Model(inputs=[generatorInput, labelsInput],
outputs=D(G([generatorInput, labelsInput])))
# Using binary_crossentropy for both outputs of the GAN model
GAN.compile(loss=['binary_crossentropy', 'binary_crossentropy'],
optimizer=tf.keras.optimizers.RMSprop(learning_rate=self.eta))
GAN.summary()
return G, D, GAN
def trainAlgorithm(self, G, D, GAN):
save_interval = 1000 # or whatever interval you prefer
# Assuming you have an augmented_data_generator defined in the data method
for epoch in range(self.epochs):
# Get a batch of augmented and balanced data
realImages, realLabels = next(self.augmented_data_generator)
# Ensure realLabels is one-hot encoded
if len(realLabels.shape) == 1 or realLabels.shape[1] != 2:
realLabels = tf.keras.utils.to_categorical(realLabels, num_classes=2)
realTag = tf.ones(shape=(realLabels.shape[0],))
noize = tf.random.uniform(shape=(realLabels.shape[0], self.latent_space), minval=-1, maxval=1)
fakeLabels = tf.keras.utils.to_categorical(np.random.choice(range(2), size=(realLabels.shape[0],)), num_classes=2)
fakeImages = tf.squeeze(G.predict([noize, fakeLabels], verbose=0))
fakeTag = tf.zeros(shape=(realLabels.shape[0],))
allImages = np.vstack([realImages, fakeImages])
allLabels = np.vstack([realLabels, fakeLabels])
allTags = np.hstack([realTag, fakeTag])
_, dlossTag, dlossLabels = D.train_on_batch(allImages, [allTags, allLabels])
_, glossTag, glossLabels = GAN.train_on_batch([noize, fakeLabels], [realTag, fakeLabels])
if epoch % 5000 == 0:
print('Epoch:', epoch)
print('Discriminator loss: [tag: {}, labels: {}], Generator loss: [tag: {}, labels: {}]'.format(dlossTag, dlossLabels, glossTag, glossLabels))
self.samples(G, noize, fakeLabels)
# Save model weights every save_interval epochs
if epoch % save_interval == 1000:
G.save_weights('generator_weights_epoch_{}.h5'.format(epoch))
D.save_weights('discriminator_weights_epoch_{}.h5'.format(epoch))
acgan = Acgan(eta=0.0001, batch_size=128, epochs=32000,
latent_space=100, image_shape=(64, 64, 3), kernel_size=5)
acgan.data(augmented_data_generator)
G, D, GAN = acgan.build()
Model: "discriminator"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_17 (InputLayer) [(None, 64, 64, 3)] 0 []
conv2d_20 (Conv2D) (None, 32, 32, 32) 2432 ['input_17[0][0]']
leaky_re_lu_20 (LeakyReLU) (None, 32, 32, 32) 0 ['conv2d_20[0][0]']
dropout_55 (Dropout) (None, 32, 32, 32) 0 ['leaky_re_lu_20[0][0]']
conv2d_21 (Conv2D) (None, 16, 16, 64) 51264 ['dropout_55[0][0]']
leaky_re_lu_21 (LeakyReLU) (None, 16, 16, 64) 0 ['conv2d_21[0][0]']
dropout_56 (Dropout) (None, 16, 16, 64) 0 ['leaky_re_lu_21[0][0]']
conv2d_22 (Conv2D) (None, 8, 8, 128) 204928 ['dropout_56[0][0]']
leaky_re_lu_22 (LeakyReLU) (None, 8, 8, 128) 0 ['conv2d_22[0][0]']
dropout_57 (Dropout) (None, 8, 8, 128) 0 ['leaky_re_lu_22[0][0]']
conv2d_23 (Conv2D) (None, 8, 8, 256) 819456 ['dropout_57[0][0]']
leaky_re_lu_23 (LeakyReLU) (None, 8, 8, 256) 0 ['conv2d_23[0][0]']
dropout_58 (Dropout) (None, 8, 8, 256) 0 ['leaky_re_lu_23[0][0]']
flatten_5 (Flatten) (None, 16384) 0 ['dropout_58[0][0]']
dense_28 (Dense) (None, 256) 4194560 ['flatten_5[0][0]']
dropout_59 (Dropout) (None, 256) 0 ['dense_28[0][0]']
dense_29 (Dense) (None, 2) 514 ['dropout_59[0][0]']
dense_27 (Dense) (None, 1) 16385 ['flatten_5[0][0]']
activation_35 (Activation) (None, 2) 0 ['dense_29[0][0]']
==================================================================================================
Total params: 5,289,539
Trainable params: 5,289,539
Non-trainable params: 0
__________________________________________________________________________________________________
Model: "generator"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_16 (InputLayer) [(None, 100)] 0 []
input_18 (InputLayer) [(None, 2)] 0 []
concatenate_5 (Concatenate) (None, 102) 0 ['input_16[0][0]',
'input_18[0][0]']
dense_25 (Dense) (None, 1024) 105472 ['concatenate_5[0][0]']
dropout_50 (Dropout) (None, 1024) 0 ['dense_25[0][0]']
dense_26 (Dense) (None, 16384) 16793600 ['dropout_50[0][0]']
reshape_5 (Reshape) (None, 8, 8, 256) 0 ['dense_26[0][0]']
layer_normalization_20 (LayerN (None, 8, 8, 256) 512 ['reshape_5[0][0]']
ormalization)
activation_30 (Activation) (None, 8, 8, 256) 0 ['layer_normalization_20[0][0]']
conv2d_transpose_25 (Conv2DTra (None, 16, 16, 256) 1638656 ['activation_30[0][0]']
nspose)
dropout_51 (Dropout) (None, 16, 16, 256) 0 ['conv2d_transpose_25[0][0]']
layer_normalization_21 (LayerN (None, 16, 16, 256) 512 ['dropout_51[0][0]']
ormalization)
activation_31 (Activation) (None, 16, 16, 256) 0 ['layer_normalization_21[0][0]']
conv2d_transpose_26 (Conv2DTra (None, 32, 32, 128) 819328 ['activation_31[0][0]']
nspose)
dropout_52 (Dropout) (None, 32, 32, 128) 0 ['conv2d_transpose_26[0][0]']
layer_normalization_22 (LayerN (None, 32, 32, 128) 256 ['dropout_52[0][0]']
ormalization)
activation_32 (Activation) (None, 32, 32, 128) 0 ['layer_normalization_22[0][0]']
conv2d_transpose_27 (Conv2DTra (None, 64, 64, 64) 204864 ['activation_32[0][0]']
nspose)
dropout_53 (Dropout) (None, 64, 64, 64) 0 ['conv2d_transpose_27[0][0]']
layer_normalization_23 (LayerN (None, 64, 64, 64) 128 ['dropout_53[0][0]']
ormalization)
activation_33 (Activation) (None, 64, 64, 64) 0 ['layer_normalization_23[0][0]']
conv2d_transpose_28 (Conv2DTra (None, 64, 64, 32) 51232 ['activation_33[0][0]']
nspose)
dropout_54 (Dropout) (None, 64, 64, 32) 0 ['conv2d_transpose_28[0][0]']
conv2d_transpose_29 (Conv2DTra (None, 64, 64, 3) 2403 ['dropout_54[0][0]']
nspose)
activation_34 (Activation) (None, 64, 64, 3) 0 ['conv2d_transpose_29[0][0]']
==================================================================================================
Total params: 19,616,963
Trainable params: 19,616,963
Non-trainable params: 0
__________________________________________________________________________________________________
Model: "model_5"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_16 (InputLayer) [(None, 100)] 0 []
input_18 (InputLayer) [(None, 2)] 0 []
generator (Functional) (None, 64, 64, 3) 19616963 ['input_16[0][0]',
'input_18[0][0]']
discriminator (Functional) [(None, 1), 5289539 ['generator[0][0]']
(None, 2)]
==================================================================================================
Total params: 24,906,502
Trainable params: 19,616,963
Non-trainable params: 5,289,539
__________________________________________________________________________________________________
tf.keras.utils.plot_model(GAN, show_shapes = True)
tf.keras.utils.plot_model(G, show_shapes = True)
tf.keras.utils.plot_model(D, show_shapes = True)
with tf.device('/GPU:0'):
acgan.trainAlgorithm(G, D, GAN)
Epoch: 0 Discriminator loss: [tag: 4.350347518920898, labels: 0.6896167397499084], Generator loss: [tag: 1.2831158638000488, labels: 0.7935552597045898] 2/2 [==============================] - 0s 25ms/step
Epoch: 5000 Discriminator loss: [tag: 0.3611697852611542, labels: 0.0747847855091095], Generator loss: [tag: 0.5315833687782288, labels: 0.0011084610596299171] 2/2 [==============================] - 0s 16ms/step
Epoch: 10000 Discriminator loss: [tag: 0.3738906979560852, labels: 0.11210362613201141], Generator loss: [tag: 0.5421265363693237, labels: 0.0030646587256342173] 2/2 [==============================] - 0s 15ms/step
Epoch: 15000 Discriminator loss: [tag: 0.39036989212036133, labels: 0.09206049889326096], Generator loss: [tag: 0.5322836637496948, labels: 0.0006376770325005054] 2/2 [==============================] - 0s 15ms/step
Epoch: 20000 Discriminator loss: [tag: 0.49306049942970276, labels: 0.10983319580554962], Generator loss: [tag: 0.48963266611099243, labels: 0.0017023978289216757] 2/2 [==============================] - 0s 16ms/step
Epoch: 25000 Discriminator loss: [tag: 0.3610495626926422, labels: 0.08360089361667633], Generator loss: [tag: 0.5249466896057129, labels: 0.0004988627624697983] 2/2 [==============================] - 0s 15ms/step
Epoch: 30000 Discriminator loss: [tag: 0.5126365423202515, labels: 0.08138077706098557], Generator loss: [tag: 0.5304501056671143, labels: 0.0009997050510719419] 2/2 [==============================] - 0s 15ms/step
G.save('/kaggle/working/generator.h5')
WARNING:tensorflow:Compiled the loaded model, but the compiled metrics have yet to be built. `model.compile_metrics` will be empty until you train or evaluate the model.
G = tf.keras.models.load_model('/kaggle/working/generator.h5')
WARNING:tensorflow:No training configuration found in the save file, so the model was *not* compiled. Compile it manually.
datasetGenerationSize = 30000
noize = tf.random.uniform(shape = (datasetGenerationSize, 100), minval = -1, maxval = 1)
newlabels = tf.keras.utils.to_categorical(np.random.choice([0, 1], size = (datasetGenerationSize, )), num_classes = 2)
noize.shape, newlabels.shape
(TensorShape([30000, 100]), (30000, 2))
np.unique(np.argmax(newlabels, axis = 1), return_counts = True)
(array([0, 1], dtype=int64), array([15059, 14941], dtype=int64))
imagesGeneration = G.predict([noize, newlabels])
imagesGeneration.shape
938/938 [==============================] - 22s 16ms/step
(30000, 64, 64, 3)
plt.figure(figsize = (12, 12))
t = np.argmax(newlabels, axis = 1)
for i in range(64):
plt.subplot(8, 8, (i + 1))
plt.imshow(imagesGeneration[i])
plt.title(t[i])
plt.legend()
No artists with labels found to put in legend. Note that artists whose label start with an underscore are ignored when legend() is called with no argument.
<matplotlib.legend.Legend at 0x2b384dca260>
In order to be able to evaluate the images generated by the generating neural network, we can do so by proposing a neural structure dedicated to classifying the images generated by the generating neural network, and then we return to the basic images included in the dataset, and we evaluate the performance of the classified neural network that It was trained on the generated images, in order to see if the learned characteristics of the generated images can give high results on the basic images included in the dataset.
basemodel = tf.keras.applications.VGG16(weights = None, input_shape = (64, 64, 3),
pooling = 'max', include_top = False)
x = layers.Dropout(0.4)(basemodel.output)
x = layers.Dense(128,)(x)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(alpha = 0.2)(x)
x = layers.Dropout(0.4)(x)
x = layers.Dense(32,)(x)
x = layers.BatchNormalization()(x)
x = layers.LeakyReLU(alpha = 0.2)(x)
x = layers.Dropout(0.4)(x)
x = layers.Dense(1, activation = 'sigmoid')(x)
m = tf.keras.models.Model(inputs = basemodel.input, outputs = x)
m.compile(loss = 'binary_crossentropy', optimizer = tf.keras.optimizers.Adam(learning_rate = 0.00001))
m.summary()
Model: "model_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_2 (InputLayer) [(None, 64, 64, 3)] 0
block1_conv1 (Conv2D) (None, 64, 64, 64) 1792
block1_conv2 (Conv2D) (None, 64, 64, 64) 36928
block1_pool (MaxPooling2D) (None, 32, 32, 64) 0
block2_conv1 (Conv2D) (None, 32, 32, 128) 73856
block2_conv2 (Conv2D) (None, 32, 32, 128) 147584
block2_pool (MaxPooling2D) (None, 16, 16, 128) 0
block3_conv1 (Conv2D) (None, 16, 16, 256) 295168
block3_conv2 (Conv2D) (None, 16, 16, 256) 590080
block3_conv3 (Conv2D) (None, 16, 16, 256) 590080
block3_pool (MaxPooling2D) (None, 8, 8, 256) 0
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_2 (InputLayer) [(None, 64, 64, 3)] 0
block1_conv1 (Conv2D) (None, 64, 64, 64) 1792
block1_conv2 (Conv2D) (None, 64, 64, 64) 36928
block1_pool (MaxPooling2D) (None, 32, 32, 64) 0
block2_conv1 (Conv2D) (None, 32, 32, 128) 73856
block2_conv2 (Conv2D) (None, 32, 32, 128) 147584
block2_pool (MaxPooling2D) (None, 16, 16, 128) 0
block3_conv1 (Conv2D) (None, 16, 16, 256) 295168
block3_conv2 (Conv2D) (None, 16, 16, 256) 590080
block3_conv3 (Conv2D) (None, 16, 16, 256) 590080
block3_pool (MaxPooling2D) (None, 8, 8, 256) 0
block4_conv1 (Conv2D) (None, 8, 8, 512) 1180160
block4_conv2 (Conv2D) (None, 8, 8, 512) 2359808
block4_conv3 (Conv2D) (None, 8, 8, 512) 2359808
block4_pool (MaxPooling2D) (None, 4, 4, 512) 0
block5_conv1 (Conv2D) (None, 4, 4, 512) 2359808
block5_conv2 (Conv2D) (None, 4, 4, 512) 2359808
block5_conv3 (Conv2D) (None, 4, 4, 512) 2359808
block5_pool (MaxPooling2D) (None, 2, 2, 512) 0
global_max_pooling2d_1 (Glo (None, 512) 0
balMaxPooling2D)
dropout_3 (Dropout) (None, 512) 0
dense_3 (Dense) (None, 128) 65664
batch_normalization_2 (Batc (None, 128) 512
hNormalization)
leaky_re_lu_2 (LeakyReLU) (None, 128) 0
dropout_4 (Dropout) (None, 128) 0
dense_4 (Dense) (None, 32) 4128
batch_normalization_3 (Batc (None, 32) 128
hNormalization)
leaky_re_lu_3 (LeakyReLU) (None, 32) 0
dropout_5 (Dropout) (None, 32) 0
dense_5 (Dense) (None, 1) 33
=================================================================
Total params: 14,785,153
Trainable params: 14,784,833
Non-trainable params: 320
_________________________________________________________________
history = m.fit(imagesGeneration, np.argmax(newlabels, axis = 1),
epochs = 60, batch_size = 64,
validation_split = 0.2,
callbacks = [tf.keras.callbacks.EarlyStopping(patience = 2, monitor = 'val_loss', mode = 'min',
restore_best_weights = True)])
Epoch 1/60 375/375 [==============================] - 27s 62ms/step - loss: 0.0965 - val_loss: 0.4387 Epoch 2/60 375/375 [==============================] - 23s 60ms/step - loss: 0.0456 - val_loss: 0.0368 Epoch 3/60 375/375 [==============================] - 25s 68ms/step - loss: 0.0393 - val_loss: 0.0160 Epoch 4/60 375/375 [==============================] - 25s 68ms/step - loss: 0.0351 - val_loss: 0.0146 Epoch 5/60 375/375 [==============================] - 25s 67ms/step - loss: 0.0310 - val_loss: 0.0129 Epoch 6/60 375/375 [==============================] - 26s 68ms/step - loss: 0.0283 - val_loss: 0.0125 Epoch 7/60 375/375 [==============================] - 25s 67ms/step - loss: 0.0258 - val_loss: 0.0107 Epoch 8/60 375/375 [==============================] - 25s 66ms/step - loss: 0.0239 - val_loss: 0.0113 Epoch 9/60 375/375 [==============================] - 25s 66ms/step - loss: 0.0213 - val_loss: 0.0095 Epoch 10/60 375/375 [==============================] - 25s 66ms/step - loss: 0.0200 - val_loss: 0.0088 Epoch 11/60 375/375 [==============================] - 25s 66ms/step - loss: 0.0185 - val_loss: 0.0089 Epoch 12/60 375/375 [==============================] - 25s 66ms/step - loss: 0.0171 - val_loss: 0.0065 Epoch 13/60 375/375 [==============================] - 26s 69ms/step - loss: 0.0159 - val_loss: 0.0062 Epoch 14/60 375/375 [==============================] - 26s 69ms/step - loss: 0.0148 - val_loss: 0.0063 Epoch 15/60 375/375 [==============================] - 26s 69ms/step - loss: 0.0138 - val_loss: 0.0057 Epoch 16/60 375/375 [==============================] - 26s 70ms/step - loss: 0.0125 - val_loss: 0.0053 Epoch 17/60 375/375 [==============================] - 26s 70ms/step - loss: 0.0119 - val_loss: 0.0045 Epoch 18/60 375/375 [==============================] - 26s 70ms/step - loss: 0.0112 - val_loss: 0.0045 Epoch 19/60 375/375 [==============================] - 26s 69ms/step - loss: 0.0107 - val_loss: 0.0050 Epoch 20/60 375/375 [==============================] - 26s 69ms/step - loss: 0.0097 - val_loss: 0.0036 Epoch 21/60 375/375 [==============================] - 26s 70ms/step - loss: 0.0092 - val_loss: 0.0030 Epoch 22/60 375/375 [==============================] - 26s 70ms/step - loss: 0.0084 - val_loss: 0.0029 Epoch 23/60 375/375 [==============================] - 26s 69ms/step - loss: 0.0082 - val_loss: 0.0027 Epoch 24/60 375/375 [==============================] - 26s 69ms/step - loss: 0.0074 - val_loss: 0.0031 Epoch 25/60 375/375 [==============================] - 27s 71ms/step - loss: 0.0069 - val_loss: 0.0025 Epoch 26/60 375/375 [==============================] - 27s 72ms/step - loss: 0.0065 - val_loss: 0.0025 Epoch 27/60 375/375 [==============================] - 27s 71ms/step - loss: 0.0061 - val_loss: 0.0020 Epoch 28/60 375/375 [==============================] - 27s 72ms/step - loss: 0.0058 - val_loss: 0.0021 Epoch 29/60 375/375 [==============================] - 26s 69ms/step - loss: 0.0054 - val_loss: 0.0016 Epoch 30/60 375/375 [==============================] - 27s 72ms/step - loss: 0.0051 - val_loss: 0.0016 Epoch 31/60 375/375 [==============================] - 27s 73ms/step - loss: 0.0049 - val_loss: 0.0016 Epoch 32/60 375/375 [==============================] - 27s 71ms/step - loss: 0.0046 - val_loss: 0.0014 Epoch 33/60 375/375 [==============================] - 26s 70ms/step - loss: 0.0042 - val_loss: 0.0012 Epoch 34/60 375/375 [==============================] - 27s 71ms/step - loss: 0.0040 - val_loss: 0.0018 Epoch 35/60 375/375 [==============================] - 26s 71ms/step - loss: 0.0038 - val_loss: 9.9022e-04 Epoch 36/60 375/375 [==============================] - 28s 74ms/step - loss: 0.0034 - val_loss: 7.5286e-04 Epoch 37/60 375/375 [==============================] - 27s 72ms/step - loss: 0.0033 - val_loss: 9.7443e-04 Epoch 38/60 375/375 [==============================] - 27s 71ms/step - loss: 0.0031 - val_loss: 7.5037e-04 Epoch 39/60 375/375 [==============================] - 26s 70ms/step - loss: 0.0029 - val_loss: 6.8682e-04 Epoch 40/60 375/375 [==============================] - 26s 71ms/step - loss: 0.0028 - val_loss: 5.6203e-04 Epoch 41/60 375/375 [==============================] - 26s 70ms/step - loss: 0.0027 - val_loss: 6.5619e-04 Epoch 42/60 375/375 [==============================] - 26s 70ms/step - loss: 0.0024 - val_loss: 7.4219e-04
plt.figure(figsize = (7, 6))
plt.plot(history.history['loss'], label = 'training loss')
plt.plot(history.history['val_loss'], label = 'validation loss')
plt.title('Results obtained while training a neural network on images generated by the neural network')
plt.legend()
<matplotlib.legend.Legend at 0x2b2ec0699f0>
m.evaluate(image_datagen.flow(images_resampled, labels_resampled, batch_size = 64), verbose = 1)
122/122 [==============================] - 10s 79ms/step - loss: 3.7441
3.7441446781158447
y_pred = tf.squeeze(m.predict(image_datagen.flow(images_resampled, batch_size = 64), verbose = 1))
y_pred.shape
122/122 [==============================] - 10s 79ms/step
TensorShape([7750])
y_pred = y_pred >= 0.5
y_pred = np.array(y_pred, dtype = 'int32')
y_pred
array([1, 1, 1, ..., 1, 1, 1])
accuracy_score(y_pred, (image_datagen.flow(labels_resampled, batch_size = 64), verbose = 1))*100
Cell In[45], line 1 accuracy_score(y_pred, (image_datagen.flow(labels_resampled, batch_size = 64), verbose = 1))*100 ^ SyntaxError: invalid syntax. Maybe you meant '==' or ':=' instead of '='?
print(classification_report(y_pred, labels))
precision recall f1-score support
0 0.99 0.37 0.54 3561
1 0.42 0.99 0.59 1655
accuracy 0.57 5216
macro avg 0.70 0.68 0.57 5216
weighted avg 0.81 0.57 0.56 5216
from sklearn.metrics import confusion_matrix
import seaborn as sns
cm = confusion_matrix(y_pred, labels)
cm
array([[1323, 2238],
[ 18, 1637]], dtype=int64)
import pandas as pd
cmObject = pd.DataFrame(cm , index = ['NORMAL', 'PNEUMONIA'],
columns = ['NORMAL', 'PNEUMONIA'])
cmObject.head()
| NORMAL | PNEUMONIA | |
|---|---|---|
| NORMAL | 1323 | 2238 |
| PNEUMONIA | 18 | 1637 |
print('f1_score: {}, recall_score: {}, precision_score: {}'.format(f1_score(y_pred, labels)*100,
recall_score(y_pred, labels)*100,
precision_score(y_pred, labels)*100))
f1_score: 59.20433996383363, recall_score: 98.91238670694864, precision_score: 42.24516129032258
sns.heatmap(cmObject, annot = True, cmap="Blues", fmt = 'g')
<Axes: >
But the slight variation in the accuracy of the classification for each class is due to the fact that we need more training time for the generative adversarial network, which helps to focus more on the characteristics of each class (because the number of samples in the basic dataset is different for each class (the healthy case, pneumonia)).
# Compile the model
m.compile(loss = 'binary_crossentropy', optimizer = tf.keras.optimizers.Adam(learning_rate = 0.00001))
m.summary()
# Save the model
m.save('/kaggle/working/normal_pneumonia_classifier.h5')
Model: "model_3"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_10 (InputLayer) [(None, 64, 64, 3)] 0
block1_conv1 (Conv2D) (None, 64, 64, 64) 1792
block1_conv2 (Conv2D) (None, 64, 64, 64) 36928
block1_pool (MaxPooling2D) (None, 32, 32, 64) 0
block2_conv1 (Conv2D) (None, 32, 32, 128) 73856
block2_conv2 (Conv2D) (None, 32, 32, 128) 147584
block2_pool (MaxPooling2D) (None, 16, 16, 128) 0
block3_conv1 (Conv2D) (None, 16, 16, 256) 295168
block3_conv2 (Conv2D) (None, 16, 16, 256) 590080
block3_conv3 (Conv2D) (None, 16, 16, 256) 590080
block3_pool (MaxPooling2D) (None, 8, 8, 256) 0
block4_conv1 (Conv2D) (None, 8, 8, 512) 1180160
block4_conv2 (Conv2D) (None, 8, 8, 512) 2359808
block4_conv3 (Conv2D) (None, 8, 8, 512) 2359808
block4_pool (MaxPooling2D) (None, 4, 4, 512) 0
block5_conv1 (Conv2D) (None, 4, 4, 512) 2359808
block5_conv2 (Conv2D) (None, 4, 4, 512) 2359808
block5_conv3 (Conv2D) (None, 4, 4, 512) 2359808
block5_pool (MaxPooling2D) (None, 2, 2, 512) 0
global_max_pooling2d (Globa (None, 512) 0
lMaxPooling2D)
dropout_3 (Dropout) (None, 512) 0
dense_15 (Dense) (None, 128) 65664
batch_normalization (BatchN (None, 128) 512
ormalization)
leaky_re_lu_12 (LeakyReLU) (None, 128) 0
dropout_4 (Dropout) (None, 128) 0
dense_16 (Dense) (None, 32) 4128
batch_normalization_1 (Batc (None, 32) 128
hNormalization)
leaky_re_lu_13 (LeakyReLU) (None, 32) 0
dropout_5 (Dropout) (None, 32) 0
dense_17 (Dense) (None, 1) 33
=================================================================
Total params: 14,785,153
Trainable params: 14,784,833
Non-trainable params: 320
_________________________________________________________________